reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
}
+void reflect_event(struct pt_regs *regs)
+{
+ unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
+ struct vcpu *v = current;
+
+ /* Sanity check */
+ if (is_idle_vcpu(v) || !user_mode(regs)) {
+ //printk("WARN: invocation to reflect_event in nested xen\n");
+ return;
+ }
+
+ if (!event_pending(v))
+ return;
+
+ if (!PSCB(v,interrupt_collection_enabled))
+ printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
+ regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
+ PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
+ PSCB(v,precover_ifs) = regs->cr_ifs;
+ vcpu_bsw0(v);
+ PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
+ PSCB(v,isr) = isr;
+ PSCB(v,iip) = regs->cr_iip;
+ PSCB(v,ifs) = 0;
+ PSCB(v,incomplete_regframe) = 0;
+
+ regs->cr_iip = v->arch.event_callback_ip;
+ regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
+ regs->r31 = XSI_IPSR;
+
+ v->vcpu_info->evtchn_upcall_mask = 1;
+ PSCB(v,interrupt_collection_enabled) = 0;
+}
+
// ONLY gets called from ia64_leave_kernel
// ONLY call with interrupts disabled?? (else might miss one?)
// NEVER successful if already reflecting a trap/fault because psr.i==0
struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
if (!is_idle_domain(d) && user_mode(regs)) {
- //vcpu_poke_timer(v);
if (vcpu_deliverable_interrupts(v))
reflect_extint(regs);
else if (PSCB(v,pending_interruption))
printf("vcpu_pend_interrupt: bad vector\n");
return;
}
- if ( VMX_DOMAIN(vcpu) ) {
- set_bit(vector,VCPU(vcpu,irr));
- } else
- {
- if (test_bit(vector,PSCBX(vcpu,irr))) {
-//printf("vcpu_pend_interrupt: overrun\n");
+
+ if (vcpu->arch.event_callback_ip) {
+ printf("Deprecated interface. Move to new event based solution\n");
+ return;
+ }
+
+ if ( VMX_DOMAIN(vcpu) ) {
+ set_bit(vector,VCPU(vcpu,irr));
+ } else {
+ set_bit(vector,PSCBX(vcpu,irr));
+ PSCB(vcpu,pending_interruption) = 1;
}
- set_bit(vector,PSCBX(vcpu,irr));
- PSCB(vcpu,pending_interruption) = 1;
- }
}
#define IA64_TPR_MMI 0x10000
{
UINT64 *p, *r, bits, bitnum, mask, i, vector;
+ if (vcpu->arch.event_callback_ip)
+ return SPURIOUS_VECTOR;
+
/* Always check pending event, since guest may just ack the
* event injection without handle. Later guest may throw out
* the event itself.
// don't deliver another
return;
}
- vcpu_pend_interrupt(vcpu, itv);
+ if (vcpu->arch.event_callback_ip) {
+ /* A small window may occur when injecting vIRQ while related
+ * handler has not been registered. Don't fire in such case.
+ */
+ if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
+ send_guest_vcpu_virq(vcpu, VIRQ_ITC);
+ PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
+ }
+ } else
+ vcpu_pend_interrupt(vcpu, itv);
}
// returns true if ready to deliver a timer interrupt too early
if ( running )
smp_send_event_check_cpu(v->processor);
- if(!VMX_DOMAIN(v))
- vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
+ if(!VMX_DOMAIN(v) && !v->arch.event_callback_ip)
+ vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
}
/* Note: Bitwise operations result in fast code with no branches. */